In [2125]:
import os
import torch
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import cv2
from alive_progress import alive_bar
import time
from PIL import Image as im 
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
from sklearn import metrics
from sklearn.metrics import r2_score, confusion_matrix
In [2126]:
os.listdir()
Out[2126]:
['princess',
 'Disney Better-Copy1.ipynb',
 '.DS_Store',
 'pooh_Extra',
 'test',
 'Disney.ipynb',
 'Untitled.ipynb',
 'disney_model.keras',
 'olaf_Extra',
 'donald_Extra',
 'Disney Better.ipynb',
 'train',
 'X.npy',
 'Y.npy',
 '.ipynb_checkpoints',
 'pumba_Extra',
 'mickey_Extra']
In [2127]:
os.listdir("princess")
Out[2127]:
['Cinderella',
 'ariel',
 'arura',
 '.DS_Store',
 'tiana',
 'ruponzel',
 'merida',
 'belle',
 'Snow White',
 'elsa',
 'jasmine',
 'anna']
In [2128]:
os.listdir("test")
Out[2128]:
['pumba', 'donald', '.DS_Store', 'mickey', 'pooh', 'minion', 'olaf']
In [2129]:
click = 1
n = 100
try:
    Y = np.load("Y.npy")
    X = np.load("X.npy")
except:
    click = 0
In [2130]:
if click == 0:
    X = []
    X_Files = []
    Y = []
    for i in tqdm(range(0, len(os.listdir("princess")))):
        if os.listdir("princess")[i] != ".DS_Store":
            folder = "princess/" + os.listdir("princess")[i]
            for j in range(0, len(os.listdir(folder))):
                if os.listdir(folder)[j] != ".DS_Store":
                    X_Files.append(folder + "/" + os.listdir(folder)[j])
                    Y.append(os.listdir("princess")[i])
    for i in tqdm(range(0, len(os.listdir("test")))):
        if os.listdir("test")[i] != ".DS_Store":
            folder = "test/" + os.listdir("test")[i]
            for j in range(0, len(os.listdir(folder))):
                if os.listdir(folder)[j] != ".DS_Store":
                    X_Files.append(folder + "/" + os.listdir(folder)[j])
                    Y.append(os.listdir("test")[i])
    for i in tqdm(range(0, len(os.listdir("train")))):
        if os.listdir("train")[i] != ".DS_Store":
            folder = "train/" + os.listdir("train")[i]
            for j in range(0, len(os.listdir(folder))):
                if os.listdir(folder)[j] != ".DS_Store":
                    X_Files.append(folder + "/" + os.listdir(folder)[j])
                    Y.append(os.listdir("train")[i])
In [2131]:
def rescale(img, n):
    image = cv2.imread(img)
    image = cv2.resize(image, [n, n])
    return image
In [2132]:
#len(X_Files)
In [2133]:
#Y
In [2134]:
if click == 0:
    for i in tqdm(range(0, 3000)):
        X.append(rescale(X_Files[i], n))
In [2135]:
if click == 0:
    for i in tqdm(range(3000, 6000)):
        X.append(rescale(X_Files[i], n))
In [2136]:
if click == 0:
    for i in tqdm(range(6000, len(X_Files))):
        X.append(rescale(X_Files[i], n))
In [2137]:
X[0].shape
Out[2137]:
(100, 100, 3)
In [2138]:
len(Y)
Out[2138]:
8698
In [2139]:
if click == 0:
    np.save("X.npy", X)
    np.save("Y.npy", Y)
In [2140]:
np.unique(Y, return_counts=True)
Out[2140]:
(array(['Cinderella', 'Snow White', 'anna', 'ariel', 'arura', 'belle',
        'donald', 'elsa', 'jasmine', 'merida', 'mickey', 'minion', 'olaf',
        'pooh', 'pumba', 'ruponzel', 'tiana'], dtype='<U10'),
 array([615, 384, 328, 304, 377, 280, 580, 597, 500, 378, 523, 781, 686,
        677, 701, 519, 468]))
In [2141]:
#Y
In [2142]:
ld = LabelEncoder()
ld.fit(np.unique(Y))
Y_t = ld.transform(Y)
In [2143]:
def trans(n, N):
    a = []
    for i in range(0, N):
        if i == n:
            a.append(1)
        else:
            a.append(0)
    return a
In [2144]:
Y = []
for i in range(0, len(Y_t)):
    Y.append(trans(Y_t[i], len(np.unique(Y_t))))
Y = np.array(Y)
X = np.array(X)
In [2145]:
Y_t
Out[2145]:
array([ 0,  0,  0, ..., 12, 12, 12])
In [2146]:
#Y
In [2147]:
X_train, X_test, Y_train, Y_test = train_test_split(X, Y_t, test_size=0.37)
In [2148]:
#X_train
In [2149]:
Y_train
Out[2149]:
array([ 7,  0,  7, ...,  7, 13, 13])
In [2150]:
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
model.add(layers.AvgPool2D((2, 2)))
model.add(layers.BatchNormalization(synchronized=True))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.AvgPool2D((2, 2)))
model.add(layers.BatchNormalization(synchronized=True))
model.add(layers.Conv2D(128, (2, 2), activation='relu'))
model.add(layers.AvgPool2D((2, 2)))
model.add(layers.BatchNormalization(synchronized=True))
model.add(layers.Conv2D(256, (3, 3), activation='relu'))
model.add(layers.AvgPool2D((2, 2)))
model.add(layers.BatchNormalization(synchronized=True))
model.add(layers.Conv2D(512, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.BatchNormalization(synchronized=True))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(17, activation = 'softmax'))
In [2151]:
model.summary()
Model: "sequential_36"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_179 (Conv2D)         (None, 98, 98, 32)        896       
                                                                 
 average_pooling2d_146 (Aver  (None, 49, 49, 32)       0         
 agePooling2D)                                                   
                                                                 
 batch_normalization (BatchN  (None, 49, 49, 32)       128       
 ormalization)                                                   
                                                                 
 conv2d_180 (Conv2D)         (None, 47, 47, 64)        18496     
                                                                 
 average_pooling2d_147 (Aver  (None, 23, 23, 64)       0         
 agePooling2D)                                                   
                                                                 
 batch_normalization_1 (Batc  (None, 23, 23, 64)       256       
 hNormalization)                                                 
                                                                 
 conv2d_181 (Conv2D)         (None, 22, 22, 128)       32896     
                                                                 
 average_pooling2d_148 (Aver  (None, 11, 11, 128)      0         
 agePooling2D)                                                   
                                                                 
 batch_normalization_2 (Batc  (None, 11, 11, 128)      512       
 hNormalization)                                                 
                                                                 
 conv2d_182 (Conv2D)         (None, 9, 9, 256)         295168    
                                                                 
 average_pooling2d_149 (Aver  (None, 4, 4, 256)        0         
 agePooling2D)                                                   
                                                                 
 batch_normalization_3 (Batc  (None, 4, 4, 256)        1024      
 hNormalization)                                                 
                                                                 
 conv2d_183 (Conv2D)         (None, 2, 2, 512)         1180160   
                                                                 
 max_pooling2d_33 (MaxPoolin  (None, 1, 1, 512)        0         
 g2D)                                                            
                                                                 
 batch_normalization_4 (Batc  (None, 1, 1, 512)        2048      
 hNormalization)                                                 
                                                                 
 flatten_36 (Flatten)        (None, 512)               0         
                                                                 
 dense_107 (Dense)           (None, 64)                32832     
                                                                 
 dense_108 (Dense)           (None, 32)                2080      
                                                                 
 dense_109 (Dense)           (None, 17)                561       
                                                                 
=================================================================
Total params: 1,567,057
Trainable params: 1,565,073
Non-trainable params: 1,984
_________________________________________________________________
In [2152]:
X_train[0].shape
Out[2152]:
(100, 100, 3)
In [2153]:
Y_train[0]
Out[2153]:
7
In [2154]:
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate = 1e-3),
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

history = model.fit(X_train, Y_train, epochs=30, 
                    validation_data=(X_test, Y_test))
Epoch 1/30
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/keras/backend.py:5612: UserWarning: "`sparse_categorical_crossentropy` received `from_logits=True`, but the `output` argument was produced by a Softmax activation and thus does not represent logits. Was this intended?
  output, from_logits = _get_logits(
172/172 [==============================] - 56s 306ms/step - loss: 1.7634 - accuracy: 0.4877 - val_loss: 1.7273 - val_accuracy: 0.4970
Epoch 2/30
172/172 [==============================] - 46s 268ms/step - loss: 1.1829 - accuracy: 0.6543 - val_loss: 1.5016 - val_accuracy: 0.6080
Epoch 3/30
172/172 [==============================] - 48s 279ms/step - loss: 0.9444 - accuracy: 0.7302 - val_loss: 0.9563 - val_accuracy: 0.7232
Epoch 4/30
172/172 [==============================] - 50s 290ms/step - loss: 0.7889 - accuracy: 0.7726 - val_loss: 1.2353 - val_accuracy: 0.6201
Epoch 5/30
172/172 [==============================] - 50s 290ms/step - loss: 0.6541 - accuracy: 0.8140 - val_loss: 0.9495 - val_accuracy: 0.7167
Epoch 6/30
172/172 [==============================] - 50s 291ms/step - loss: 0.5652 - accuracy: 0.8312 - val_loss: 0.7746 - val_accuracy: 0.7953
Epoch 7/30
172/172 [==============================] - 50s 291ms/step - loss: 0.4601 - accuracy: 0.8651 - val_loss: 0.9494 - val_accuracy: 0.7518
Epoch 8/30
172/172 [==============================] - 52s 303ms/step - loss: 0.3794 - accuracy: 0.8819 - val_loss: 0.7725 - val_accuracy: 0.8055
Epoch 9/30
172/172 [==============================] - 46s 266ms/step - loss: 0.3862 - accuracy: 0.8826 - val_loss: 0.8240 - val_accuracy: 0.7841
Epoch 10/30
172/172 [==============================] - 46s 270ms/step - loss: 0.2868 - accuracy: 0.9148 - val_loss: 0.7113 - val_accuracy: 0.8204
Epoch 11/30
172/172 [==============================] - 51s 296ms/step - loss: 0.2249 - accuracy: 0.9323 - val_loss: 0.8221 - val_accuracy: 0.8055
Epoch 12/30
172/172 [==============================] - 52s 300ms/step - loss: 0.2413 - accuracy: 0.9274 - val_loss: 0.8747 - val_accuracy: 0.8009
Epoch 13/30
172/172 [==============================] - 46s 266ms/step - loss: 0.2238 - accuracy: 0.9285 - val_loss: 0.9970 - val_accuracy: 0.7437
Epoch 14/30
172/172 [==============================] - 50s 293ms/step - loss: 0.2097 - accuracy: 0.9337 - val_loss: 0.8250 - val_accuracy: 0.8099
Epoch 15/30
172/172 [==============================] - 52s 302ms/step - loss: 0.1718 - accuracy: 0.9423 - val_loss: 1.0071 - val_accuracy: 0.7841
Epoch 16/30
172/172 [==============================] - 50s 289ms/step - loss: 0.1821 - accuracy: 0.9416 - val_loss: 0.8491 - val_accuracy: 0.8052
Epoch 17/30
172/172 [==============================] - 48s 279ms/step - loss: 0.1500 - accuracy: 0.9538 - val_loss: 0.8749 - val_accuracy: 0.8093
Epoch 18/30
172/172 [==============================] - 47s 272ms/step - loss: 0.1714 - accuracy: 0.9421 - val_loss: 0.9296 - val_accuracy: 0.8021
Epoch 19/30
172/172 [==============================] - 46s 270ms/step - loss: 0.1395 - accuracy: 0.9520 - val_loss: 0.8547 - val_accuracy: 0.8158
Epoch 20/30
172/172 [==============================] - 50s 290ms/step - loss: 0.1680 - accuracy: 0.9469 - val_loss: 1.1157 - val_accuracy: 0.7583
Epoch 21/30
172/172 [==============================] - 49s 288ms/step - loss: 0.1333 - accuracy: 0.9573 - val_loss: 0.8899 - val_accuracy: 0.8009
Epoch 22/30
172/172 [==============================] - 50s 288ms/step - loss: 0.1237 - accuracy: 0.9595 - val_loss: 2.1045 - val_accuracy: 0.5701
Epoch 23/30
172/172 [==============================] - 50s 289ms/step - loss: 0.1268 - accuracy: 0.9602 - val_loss: 0.8818 - val_accuracy: 0.8158
Epoch 24/30
172/172 [==============================] - 49s 287ms/step - loss: 0.0846 - accuracy: 0.9717 - val_loss: 0.8955 - val_accuracy: 0.8148
Epoch 25/30
172/172 [==============================] - 50s 290ms/step - loss: 0.0825 - accuracy: 0.9701 - val_loss: 0.9242 - val_accuracy: 0.8058
Epoch 26/30
172/172 [==============================] - 49s 285ms/step - loss: 0.0997 - accuracy: 0.9670 - val_loss: 1.2180 - val_accuracy: 0.7881
Epoch 27/30
172/172 [==============================] - 46s 269ms/step - loss: 0.0849 - accuracy: 0.9723 - val_loss: 0.9322 - val_accuracy: 0.8055
Epoch 28/30
172/172 [==============================] - 52s 302ms/step - loss: 0.1069 - accuracy: 0.9631 - val_loss: 1.1223 - val_accuracy: 0.7801
Epoch 29/30
172/172 [==============================] - 48s 280ms/step - loss: 0.1303 - accuracy: 0.9569 - val_loss: 1.1692 - val_accuracy: 0.7633
Epoch 30/30
172/172 [==============================] - 49s 284ms/step - loss: 0.1179 - accuracy: 0.9600 - val_loss: 0.8824 - val_accuracy: 0.8257
In [2155]:
model.save("disney_model.keras")
#model = keras.models.load_model("")
In [2156]:
y_pred = np.argmax(model.predict(X_test), axis = 1)
y_true = Y_test
101/101 [==============================] - 6s 54ms/step
In [2157]:
y_true
Out[2157]:
array([6, 7, 9, ..., 9, 8, 3])
In [2158]:
y_pred
Out[2158]:
array([7, 7, 9, ..., 9, 8, 3])
In [2159]:
model.predict(X_test)
101/101 [==============================] - 6s 55ms/step
Out[2159]:
array([[1.62356228e-04, 4.88225442e-06, 2.75278604e-03, ...,
        1.52781522e-05, 3.26160429e-04, 5.00970928e-05],
       [3.20731499e-03, 2.05876582e-04, 6.20071916e-03, ...,
        1.33721181e-03, 1.47355795e-02, 4.89634171e-04],
       [1.47258206e-05, 5.64498862e-07, 2.15134114e-05, ...,
        3.60282684e-06, 5.07090148e-03, 2.96593157e-06],
       ...,
       [2.00498391e-08, 2.28067663e-07, 3.99716955e-05, ...,
        8.04494266e-05, 6.65042216e-06, 1.45933100e-05],
       [1.22353318e-03, 6.50591048e-09, 5.26319539e-08, ...,
        3.45867693e-06, 7.24647361e-06, 1.05142271e-05],
       [9.58133114e-07, 3.01451281e-10, 2.75236509e-08, ...,
        3.12424433e-08, 7.45526268e-05, 2.56427910e-07]], dtype=float32)
In [2160]:
r2_score(y_true, y_pred)
Out[2160]:
0.6061704510413575
In [2161]:
correct = 0
total = 0
for i in range(0, len(y_pred)):
  if y_true[i] == y_pred[i]:
    correct += 1
  total += 1
round(correct/total, 4) * 100
Out[2161]:
82.57
In [2162]:
cm = confusion_matrix(y_true, y_pred)
for i in range(0, len(cm[0])):
  cm[i] = cm[i] * 100 / float(sum(cm[i]))

cm
Out[2162]:
array([[77,  0,  0,  0,  3,  2,  2,  6,  0,  0,  0,  0,  1,  0,  0,  0,
         1],
       [ 2, 81,  0,  0,  1,  0,  0,  6,  0,  0,  1,  0,  0,  0,  0,  1,
         0],
       [ 0,  0, 75,  0,  0,  1,  0, 16,  0,  2,  0,  0,  0,  0,  0,  0,
         0],
       [ 1,  0,  0, 88,  0,  2,  0,  0,  0,  0,  0,  0,  0,  0,  0,  4,
         0],
       [ 9,  0,  1,  1, 76,  0,  0,  0,  3,  0,  0,  0,  0,  0,  1,  2,
         0],
       [ 0,  0,  2,  1,  8, 62,  0,  1,  4,  3,  0,  0,  0,  0,  0,  9,
         0],
       [ 0,  0,  0,  0,  0,  0, 84,  1,  2,  0,  0,  2,  0,  2,  1,  0,
         0],
       [ 2,  0,  9,  0,  1,  0,  0, 85,  0,  0,  0,  0,  0,  0,  0,  0,
         0],
       [ 3,  1,  0,  1,  2,  0,  3,  1, 79,  0,  0,  0,  1,  0,  1,  2,
         1],
       [ 0,  0,  2,  0,  0,  0,  0,  0,  0, 87,  0,  0,  2,  0,  1,  1,
         0],
       [ 0,  1,  0,  0,  0,  0,  4,  0,  1,  0, 86,  0,  2,  0,  0,  0,
         0],
       [ 1,  0,  0,  0,  0,  0,  3,  0,  0,  0,  0, 91,  0,  0,  0,  0,
         0],
       [ 0,  0,  0,  0,  0,  0,  2,  4,  0,  0,  0,  1, 87,  0,  0,  0,
         0],
       [ 0,  0,  0,  0,  0,  0,  1,  0,  0,  0,  0,  1,  0, 89,  0,  3,
         0],
       [ 0,  0,  0,  0,  0,  0,  1,  0,  0,  1,  0,  0,  0,  2, 86,  3,
         1],
       [ 2,  1,  1,  2, 10,  3,  1,  1,  1,  1,  1,  0,  0,  0,  1, 65,
         5],
       [ 2,  2,  1,  2,  2,  3,  0,  0,  2,  0,  0,  0,  0,  0,  0,  1,
        78]])
In [2163]:
ar = []
for i in range(0, len(np.unique(Y_t))):
    ar.append(i)
dl = ld.inverse_transform(ar)
dl
Out[2163]:
array(['Cinderella', 'Snow White', 'anna', 'ariel', 'arura', 'belle',
       'donald', 'elsa', 'jasmine', 'merida', 'mickey', 'minion', 'olaf',
       'pooh', 'pumba', 'ruponzel', 'tiana'], dtype='<U10')
In [2164]:
cm_display = metrics.ConfusionMatrixDisplay(confusion_matrix = cm, display_labels = np.char.lower(dl))
cm_display.plot()
plt.title("Confusion Matrix")
plt.xticks(rotation=90)
plt.show()
In [2165]:
arr = np.array([5, 3, 4, 10, 30])
np.argsort(-arr)
Out[2165]:
array([4, 3, 0, 2, 1])
In [2166]:
def prediction(filename):
    X = rescale(filename, n)
    x = np.array([X])
    y = model.predict(x)[0]
    arr = np.argsort(-y)
    arr = arr[:5]
    index = arr[0]
    f1 = cv2.imread(filename)
    y_t = y[arr] * 100
    arr = ld.inverse_transform(arr)
    ind_arr = np.where(Y_train == index)[0]
    ind = ind_arr[random.randint(0, len(ind_arr) - 1)]
    figure2 = X_train[ind]
    figure2 = cv2.resize(figure2, [f1.shape[0], f1.shape[1]])
    D = dict(zip(np.char.upper(arr), y_t))
    plt.figure()

    #subplot(r,c) provide the no. of rows and columns
    f, axarr = plt.subplots(1,2) 
    axarr[0].imshow(f1)
    axarr[1].imshow(figure2)
    plt.show()
    return D
In [2167]:
prediction("/Users/viralchitlangia/Documents/Screenshot 2024-01-13 at 11.29.43 PM.png")
1/1 [==============================] - 0s 24ms/step
<Figure size 640x480 with 0 Axes>
Out[2167]:
{'ELSA': 60.44603,
 'ARURA': 15.674378,
 'ANNA': 12.815778,
 'SNOW WHITE': 7.179574,
 'OLAF': 2.8892725}
In [2168]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-13 at 23.32.24.jpeg")
1/1 [==============================] - 0s 24ms/step
<Figure size 640x480 with 0 Axes>
Out[2168]:
{'SNOW WHITE': 55.19971,
 'CINDERELLA': 20.143757,
 'MERIDA': 8.526195,
 'ANNA': 6.487473,
 'MINION': 3.7672057}
In [2169]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2023-11-12 at 21.31.09.jpeg")
1/1 [==============================] - 0s 24ms/step
<Figure size 640x480 with 0 Axes>
Out[2169]:
{'MICKEY': 41.360615,
 'SNOW WHITE': 30.51157,
 'RUPONZEL': 16.46349,
 'TIANA': 7.172701,
 'JASMINE': 1.3063393}
In [2170]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-13 at 23.36.38.jpeg")
1/1 [==============================] - 0s 32ms/step
<Figure size 640x480 with 0 Axes>
Out[2170]:
{'ARURA': 31.259287,
 'MINION': 24.909922,
 'OLAF': 16.80799,
 'ANNA': 6.2082453,
 'POOH': 5.856629}
In [2171]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-13 at 23.37.45.jpeg")
1/1 [==============================] - 0s 26ms/step
<Figure size 640x480 with 0 Axes>
Out[2171]:
{'SNOW WHITE': 84.61991,
 'JASMINE': 8.797051,
 'ARURA': 2.0185306,
 'OLAF': 1.7526255,
 'CINDERELLA': 1.4808806}
In [2172]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-13 at 23.40.12.jpeg")
1/1 [==============================] - 0s 28ms/step
<Figure size 640x480 with 0 Axes>
Out[2172]:
{'ELSA': 60.149372,
 'SNOW WHITE': 21.20211,
 'ANNA': 10.046525,
 'OLAF': 6.379779,
 'JASMINE': 1.1096761}
In [2173]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-13 at 23.43.20.jpeg")
1/1 [==============================] - 0s 25ms/step
<Figure size 640x480 with 0 Axes>
Out[2173]:
{'TIANA': 67.97027,
 'JASMINE': 13.140366,
 'RUPONZEL': 10.1687,
 'DONALD': 4.715852,
 'ARURA': 2.0152614}
In [2174]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-14 at 00.01.12.jpeg")
1/1 [==============================] - 0s 28ms/step
<Figure size 640x480 with 0 Axes>
Out[2174]:
{'MERIDA': 51.025055,
 'ANNA': 12.029904,
 'DONALD': 8.66766,
 'SNOW WHITE': 8.092869,
 'OLAF': 7.5700707}
In [2175]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2023-12-26 at 18.11.25 (1).jpeg")
1/1 [==============================] - 0s 25ms/step
<Figure size 640x480 with 0 Axes>
Out[2175]:
{'ELSA': 46.803024,
 'ANNA': 31.292994,
 'DONALD': 18.311522,
 'OLAF': 1.6860468,
 'MICKEY': 0.6694483}
In [2176]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2023-12-26 at 18.42.30 (1).jpeg")
1/1 [==============================] - 0s 27ms/step
<Figure size 640x480 with 0 Axes>
Out[2176]:
{'ARURA': 67.58672,
 'CINDERELLA': 18.991705,
 'SNOW WHITE': 8.642586,
 'JASMINE': 3.1455715,
 'PUMBA': 0.4698818}
In [2177]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-14 at 00.23.35.jpeg")
1/1 [==============================] - 0s 27ms/step
<Figure size 640x480 with 0 Axes>
Out[2177]:
{'PUMBA': 87.32152,
 'ARURA': 2.4389808,
 'MICKEY': 2.3336108,
 'DONALD': 2.1472142,
 'TIANA': 1.6719378}
In [2178]:
prediction("/Users/viralchitlangia/Documents/Screenshot 2024-01-14 at 12.07.19 AM.png")
1/1 [==============================] - 0s 23ms/step
<Figure size 640x480 with 0 Axes>
Out[2178]:
{'ARURA': 72.21816,
 'CINDERELLA': 18.640802,
 'SNOW WHITE': 8.464644,
 'TIANA': 0.32482988,
 'POOH': 0.12285559}
In [2179]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-14 at 00.08.36.jpeg")
1/1 [==============================] - 0s 24ms/step
<Figure size 640x480 with 0 Axes>
Out[2179]:
{'ANNA': 94.24151,
 'SNOW WHITE': 3.1033213,
 'ELSA': 1.9705905,
 'BELLE': 0.23691303,
 'MERIDA': 0.18797912}
In [2180]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-14 at 00.10.07.jpeg")
1/1 [==============================] - 0s 25ms/step
<Figure size 640x480 with 0 Axes>
Out[2180]:
{'ANNA': 96.64932,
 'ARURA': 1.1494915,
 'ELSA': 1.0625942,
 'OLAF': 0.7279662,
 'BELLE': 0.16554715}
In [2181]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-14 at 00.11.51.jpeg")
1/1 [==============================] - 0s 25ms/step
<Figure size 640x480 with 0 Axes>
Out[2181]:
{'PUMBA': 73.97549,
 'TIANA': 16.584164,
 'BELLE': 3.454937,
 'MERIDA': 1.226212,
 'SNOW WHITE': 1.2135628}
In [2182]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-14 at 00.15.47.jpeg")
1/1 [==============================] - 0s 24ms/step
<Figure size 640x480 with 0 Axes>
Out[2182]:
{'POOH': 89.53995,
 'PUMBA': 4.815385,
 'SNOW WHITE': 3.928429,
 'OLAF': 0.90828204,
 'MINION': 0.20303568}
In [2183]:
prediction("/Users/viralchitlangia/Documents/Screenshot 2024-01-14 at 12.18.01 AM.png")
1/1 [==============================] - 0s 36ms/step
<Figure size 640x480 with 0 Axes>
Out[2183]:
{'SNOW WHITE': 94.60641,
 'ARURA': 4.007886,
 'ANNA': 0.67646134,
 'CINDERELLA': 0.3037353,
 'ELSA': 0.20216419}
In [2184]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-14 at 00.19.22.jpeg")
1/1 [==============================] - 0s 27ms/step
<Figure size 640x480 with 0 Axes>
Out[2184]:
{'ANNA': 33.37899,
 'MICKEY': 29.787678,
 'SNOW WHITE': 10.118279,
 'BELLE': 7.0103183,
 'ELSA': 6.90603}
In [2185]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-14 at 00.21.43.jpeg")
1/1 [==============================] - 0s 27ms/step
<Figure size 640x480 with 0 Axes>
Out[2185]:
{'SNOW WHITE': 89.125435,
 'ARURA': 4.3205733,
 'OLAF': 3.1775522,
 'JASMINE': 2.0810754,
 'DONALD': 1.0208249}
In [2186]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-14 at 00.25.15.jpeg")
1/1 [==============================] - 0s 25ms/step
<Figure size 640x480 with 0 Axes>
Out[2186]:
{'DONALD': 41.135662,
 'SNOW WHITE': 27.207779,
 'PUMBA': 9.558139,
 'POOH': 8.895779,
 'TIANA': 3.8825696}
In [2187]:
prediction("/Users/viralchitlangia/Downloads/WhatsApp Image 2024-01-14 at 00.25.36.jpeg")
1/1 [==============================] - 0s 25ms/step
<Figure size 640x480 with 0 Axes>
Out[2187]:
{'ELSA': 68.98206,
 'ANNA': 20.265482,
 'DONALD': 8.995256,
 'OLAF': 0.4130005,
 'MERIDA': 0.3863536}
In [2188]:
prediction("/Users/viralchitlangia/Downloads/ariel.jpeg")
1/1 [==============================] - 0s 25ms/step
<Figure size 640x480 with 0 Axes>
Out[2188]:
{'ARIEL': 99.773766,
 'OLAF': 0.0952457,
 'CINDERELLA': 0.06776894,
 'ARURA': 0.04054197,
 'BELLE': 0.015413552}
In [2189]:
prediction("/Users/viralchitlangia/Downloads/Winnie.jpg")
1/1 [==============================] - 0s 28ms/step
<Figure size 640x480 with 0 Axes>
Out[2189]:
{'POOH': 99.99856,
 'ARIEL': 0.0004213578,
 'DONALD': 0.00032803084,
 'BELLE': 0.00027633217,
 'OLAF': 0.0002632521}
In [2190]:
prediction("/Users/viralchitlangia/Downloads/Jerry.jpg")
1/1 [==============================] - 0s 24ms/step
<Figure size 640x480 with 0 Axes>
Out[2190]:
{'POOH': 56.90267,
 'BELLE': 38.74346,
 'RUPONZEL': 2.3882086,
 'TIANA': 0.75440854,
 'MERIDA': 0.52598214}